import os
import tensorflow as tf
from tensorflow.keras import regularizers
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow import keras
import matplotlib.pyplot as plt
base_dir = 'chest_xray/reduced size/'

train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'test')

train_NORMAL_dir = os.path.join(train_dir, 'NORMAL')
train_PNEUMONIA_dir = os.path.join(train_dir, 'PNEUMONIA')

validation_NORMAL_dir = os.path.join(validation_dir, 'NORMAL')
validation_PNEUMONIA_dir = os.path.join(validation_dir, 'PNEUMONIA')

train_NORMAL_fnames = os.listdir( train_NORMAL_dir )
train_PNEUMONIA_fnames = os.listdir( train_PNEUMONIA_dir )

print(train_NORMAL_fnames[:10])
print(train_PNEUMONIA_fnames[:10])

print('total training NORMAL images :', len(os.listdir(      train_NORMAL_dir ) ))
print('total training PNEUMONIA images :', len(os.listdir(      train_PNEUMONIA_dir ) ))

print('total validation NORMAL images :', len(os.listdir( validation_NORMAL_dir ) ))
print('total validation PNEUMONIA images :', len(os.listdir( validation_PNEUMONIA_dir ) ))
['IM-0115-0001.jpeg', 'IM-0117-0001.jpeg', 'IM-0119-0001.jpeg', 'IM-0122-0001.jpeg', 'IM-0125-0001.jpeg', 'IM-0127-0001.jpeg', 'IM-0128-0001.jpeg', 'IM-0129-0001.jpeg', 'IM-0131-0001.jpeg', 'IM-0133-0001.jpeg']
['person100_virus_184.jpeg', 'person101_virus_187.jpeg', 'person101_virus_188.jpeg', 'person102_virus_189.jpeg', 'person103_virus_190.jpeg', 'person104_virus_191.jpeg', 'person105_virus_192.jpeg', 'person105_virus_193.jpeg', 'person106_virus_194.jpeg', 'person107_virus_197.jpeg']
total training NORMAL images : 1000
total training PNEUMONIA images : 1000
total validation NORMAL images : 100
total validation PNEUMONIA images : 100
import matplotlib.image as mpimg
import matplotlib.pyplot as plt

# Parameters for our graph; we'll output images in a 4x4 configuration
nrows = 4
ncols = 4
pic_index = 0 # Index for iterating over images

# Set up matplotlib fig, and size it to fit 4x4 pics
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)

pic_index+=8

next_NORMAL_pix = [os.path.join(train_NORMAL_dir, fname)
                for fname in train_NORMAL_fnames[ pic_index-8:pic_index]
               ]

next_PNEUMONIA_pix = [os.path.join(train_PNEUMONIA_dir, fname)
                for fname in train_PNEUMONIA_fnames[ pic_index-8:pic_index]
               ]

for i, img_path in enumerate(next_NORMAL_pix+next_PNEUMONIA_pix):
  sp = plt.subplot(nrows, ncols, i + 1)
  sp.axis('Off')

  img = mpimg.imread(img_path)
  plt.imshow(img)
class Conv(tf.keras.Model):
    def __init__(self, filters, kernel_size):
        super(Conv, self).__init__()
        
        self.conv = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size)
        self.bn = tf.keras.layers.BatchNormalization()
        self.relu = tf.keras.layers.ReLU()
        self.pool = tf.keras.layers.MaxPool2D(pool_size=(2, 2))
        
    def call(self, inputs, training=True):
        x = self.conv(inputs)
        x = self.bn(x, training=training)
        x = self.relu(x)
        x = self.pool(x)
        return x
    
model = tf.keras.Sequential(name='X-ray_CNN')

model.add(Conv(filters=32, kernel_size=(3, 3)))
model.add(Conv(filters=64, kernel_size=(3, 3)))
model.add(Conv(filters=128, kernel_size=(3, 3)))
model.add(Conv(filters=128, kernel_size=(3, 3)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(units=512, activation=tf.keras.activations.relu))
model.add(tf.keras.layers.Dense(units=2, activation=tf.keras.activations.softmax))

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
              loss=tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

from glob import glob

base_dir = 'chest_xray/reduced size/'
train_len = len(glob(os.path.join(base_dir, 'train', 'NORMAL', '*.jpeg'))) * 2
val_len = len(glob(os.path.join(base_dir, 'val', 'NORMAL', '*.jpeg'))) * 2
test_len = len(glob(os.path.join(base_dir, 'test', 'NORMAL', '*.jpeg'))) * 2
train_len
2000
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.optimizers import SGD

from tensorflow.keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator( rescale = 1.0/255,
                                    rotation_range=40,
                                    width_shift_range=0.2,
                                    height_shift_range=0.2,
                                    shear_range=0.2,
                                    zoom_range=0.2,
                                    horizontal_flip=True,
                                    fill_mode='nearest'
                                    )
validation_datagen  = ImageDataGenerator( rescale = 1.0/255,
                                    rotation_range=40,
                                    width_shift_range=0.2,
                                    height_shift_range=0.2,
                                    shear_range=0.2,
                                    zoom_range=0.2,
                                    horizontal_flip=True,
                                    fill_mode='nearest'
                                    )
# --------------------
train_generator = train_datagen.flow_from_directory(train_dir,
                                                    batch_size=20,
                                                    class_mode='binary',
                                                    target_size=(150, 150))
# --------------------
# Flow validation images in batches of 20 using test_datagen generator
# --------------------
validation_generator = validation_datagen.flow_from_directory(validation_dir,
                                                         batch_size=20,
                                                         class_mode  = 'binary',
                                                         target_size = (150, 150))


checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

# Create a callback that saves the model's weights

callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_best_only=True,
                                                 save_weights_only=False,
                                                 verbose=1)
Found 2000 images belonging to 2 classes.
Found 200 images belonging to 2 classes.
import numpy as np

def load(f, label):
    # load the file into tensor
    image = tf.io.read_file(f)
    # Decode it to JPEG format
    image = tf.image.decode_jpeg(image)
    # Convert it to tf.float32
    image = tf.cast(image, tf.float32)
    
    return image, label

def resize(input_image, size):
    return tf.image.resize(input_image, size)

def random_crop(input_image):
    return tf.image.random_crop(input_image, size=[150, 150, 1])

def central_crop(input_image):
    image = resize(input_image, [176, 176])
    return tf.image.central_crop(image, central_fraction=0.84)

def random_rotation(input_image):
    angles = np.random.randint(0, 3, 1)
    return tf.image.rot90(input_image, k=angles[0])

def random_jitter(input_image):
    # Resize it to 176 x 176 x 3
    image = resize(input_image, [176, 176])
    # Randomly Crop to 150 x 150 x 3
    image = random_crop(image)
    # Randomly rotation
    image = random_rotation(image)
    # Randomly mirroring
    image = tf.image.random_flip_left_right(image)
    return image

def normalize(input_image):
    mid = (tf.reduce_max(input_image) + tf.reduce_min(input_image)) / 2
    input_image = input_image / mid - 1
    return input_image

def load_image_train(image_file, label):
    image, label = load(image_file, label)
    image = random_jitter(image)
    image = normalize(image)
    return image, label

def load_image_val(image_file, label):
    image, label = load(image_file, label)
    image = central_crop(image)
    image = normalize(image)
    return image, label
temp_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size", 'train', 'NORMAL', '*.jpeg'))
temp_ds = temp_ds.map(lambda x: (x, 0))

temp2_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size", 'train', 'PNEUMONIA', '*.jpeg'))
temp2_ds = temp2_ds.map(lambda x: (x, 1))
train_ds = temp_ds.concatenate(temp2_ds)

buffer_size = tf.data.experimental.cardinality(train_ds).numpy()
train_ds = train_ds.shuffle(buffer_size)\
                   .map(load_image_train, num_parallel_calls=16)\
                   .batch(20)\
                   .repeat()

temp_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size", 'val', 'NORMAL', '*.jpeg'))
temp_ds = temp_ds.map(lambda x: (x, 0))

temp2_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size", 'val', 'PNEUMONIA', '*.jpeg'))
temp2_ds = temp2_ds.map(lambda x: (x, 1))

val_ds = temp_ds.concatenate(temp2_ds)

val_ds = val_ds.map(load_image_val, num_parallel_calls=16)\
               .batch(20)\
               .repeat()

temp_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size", 'test', 'NORMAL', '*.jpeg'))
temp_ds = temp_ds.map(lambda x: (x, 0))

temp2_ds = tf.data.Dataset.list_files(os.path.join("chest_xray/reduced size", 'test', 'PNEUMONIA', '*.jpeg'))
temp2_ds = temp2_ds.map(lambda x: (x, 1))

test_ds = temp_ds.concatenate(temp2_ds)

batch_size = 10
test_ds = test_ds.map(load_image_val, num_parallel_calls=16)\
               .batch(batch_size)\
               .repeat()

for images, labels in train_ds.take(1):
    fig, ax = plt.subplots(1, 10, figsize=(20, 6))
    for j in range(10):
        image = images[j].numpy()
        image = image / np.amax(image)
        image = np.clip(image, 0, 1)
        ax[j].imshow(image)
        ax[j].set_title(labels[j].numpy())
plt.show()
checkpoint_path = "./train/x-ray/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                                 save_best_only=True,
                                                 save_weights_only=True,
                                                 verbose=1)
base_dir = "chest_xray/reduced size/"
train_len = len(glob(os.path.join(base_dir, 'train', 'NORMAL', '*.jpeg'))) * 2
val_len = len(glob(os.path.join(base_dir, 'val', 'NORMAL', '*.jpeg'))) * 2
test_len = len(glob(os.path.join(base_dir, 'test', 'NORMAL', '*.jpeg'))) * 2
train_len
2000
history = model.fit(train_ds, 
          steps_per_epoch=train_len/20,
          validation_data=val_ds,
          validation_steps=val_len/20,
          epochs=50,
          verbose=1,
          callbacks=[cp_callback]
          )
Epoch 1/50
100/100 [==============================] - 87s 861ms/step - loss: 0.3496 - accuracy: 0.8715 - val_loss: 2.2965 - val_accuracy: 0.5000

Epoch 00001: val_loss improved from inf to 2.29648, saving model to ./train/x-ray\cp-0001.ckpt
Epoch 2/50
100/100 [==============================] - 90s 892ms/step - loss: 0.1919 - accuracy: 0.9310 - val_loss: 2.8285 - val_accuracy: 0.5000

Epoch 00002: val_loss did not improve from 2.29648
Epoch 3/50
100/100 [==============================] - 89s 885ms/step - loss: 0.1472 - accuracy: 0.9465 - val_loss: 3.3786 - val_accuracy: 0.5000

Epoch 00003: val_loss did not improve from 2.29648
Epoch 4/50
100/100 [==============================] - 90s 902ms/step - loss: 0.1542 - accuracy: 0.9470 - val_loss: 3.1765 - val_accuracy: 0.5000

Epoch 00004: val_loss did not improve from 2.29648
Epoch 5/50
100/100 [==============================] - 89s 889ms/step - loss: 0.1220 - accuracy: 0.9575 - val_loss: 3.1865 - val_accuracy: 0.5000

Epoch 00005: val_loss did not improve from 2.29648
Epoch 6/50
100/100 [==============================] - 104s 1s/step - loss: 0.1135 - accuracy: 0.9535 - val_loss: 2.5589 - val_accuracy: 0.5200

Epoch 00006: val_loss did not improve from 2.29648
Epoch 7/50
100/100 [==============================] - 107s 1s/step - loss: 0.1260 - accuracy: 0.9485 - val_loss: 2.1608 - val_accuracy: 0.5500

Epoch 00007: val_loss improved from 2.29648 to 2.16084, saving model to ./train/x-ray\cp-0007.ckpt
Epoch 8/50
100/100 [==============================] - 109s 1s/step - loss: 0.1193 - accuracy: 0.9590 - val_loss: 0.9770 - val_accuracy: 0.6750

Epoch 00008: val_loss improved from 2.16084 to 0.97696, saving model to ./train/x-ray\cp-0008.ckpt
Epoch 9/50
100/100 [==============================] - 110s 1s/step - loss: 0.1155 - accuracy: 0.9565 - val_loss: 2.2169 - val_accuracy: 0.5750

Epoch 00009: val_loss did not improve from 0.97696
Epoch 10/50
100/100 [==============================] - 107s 1s/step - loss: 0.1043 - accuracy: 0.9650 - val_loss: 1.9094 - val_accuracy: 0.6100

Epoch 00010: val_loss did not improve from 0.97696
Epoch 11/50
100/100 [==============================] - 105s 1s/step - loss: 0.1095 - accuracy: 0.9585 - val_loss: 2.1666 - val_accuracy: 0.5850

Epoch 00011: val_loss did not improve from 0.97696
Epoch 12/50
100/100 [==============================] - 85s 845ms/step - loss: 0.1155 - accuracy: 0.9545 - val_loss: 0.7729 - val_accuracy: 0.7650

Epoch 00012: val_loss improved from 0.97696 to 0.77294, saving model to ./train/x-ray\cp-0012.ckpt
Epoch 13/50
100/100 [==============================] - 84s 837ms/step - loss: 0.0800 - accuracy: 0.9735 - val_loss: 0.9975 - val_accuracy: 0.6950

Epoch 00013: val_loss did not improve from 0.77294
Epoch 14/50
100/100 [==============================] - 85s 848ms/step - loss: 0.0758 - accuracy: 0.9725 - val_loss: 2.3209 - val_accuracy: 0.6150

Epoch 00014: val_loss did not improve from 0.77294
Epoch 15/50
100/100 [==============================] - 86s 854ms/step - loss: 0.0912 - accuracy: 0.9645 - val_loss: 1.2139 - val_accuracy: 0.6350

Epoch 00015: val_loss did not improve from 0.77294
Epoch 16/50
100/100 [==============================] - 86s 852ms/step - loss: 0.0764 - accuracy: 0.9720 - val_loss: 1.4312 - val_accuracy: 0.6500

Epoch 00016: val_loss did not improve from 0.77294
Epoch 17/50
100/100 [==============================] - 85s 850ms/step - loss: 0.0807 - accuracy: 0.9675 - val_loss: 3.3588 - val_accuracy: 0.5300

Epoch 00017: val_loss did not improve from 0.77294
Epoch 18/50
100/100 [==============================] - 85s 845ms/step - loss: 0.0813 - accuracy: 0.9715 - val_loss: 0.8275 - val_accuracy: 0.7300

Epoch 00018: val_loss did not improve from 0.77294
Epoch 19/50
100/100 [==============================] - 85s 848ms/step - loss: 0.0810 - accuracy: 0.9680 - val_loss: 1.3656 - val_accuracy: 0.6500

Epoch 00019: val_loss did not improve from 0.77294
Epoch 20/50
100/100 [==============================] - 100s 996ms/step - loss: 0.0676 - accuracy: 0.9775 - val_loss: 1.9736 - val_accuracy: 0.6200

Epoch 00020: val_loss did not improve from 0.77294
Epoch 21/50
100/100 [==============================] - 106s 1s/step - loss: 0.0744 - accuracy: 0.9725 - val_loss: 0.9104 - val_accuracy: 0.7250

Epoch 00021: val_loss did not improve from 0.77294
Epoch 22/50
100/100 [==============================] - 108s 1s/step - loss: 0.0563 - accuracy: 0.9820 - val_loss: 1.5101 - val_accuracy: 0.6550

Epoch 00022: val_loss did not improve from 0.77294
Epoch 23/50
100/100 [==============================] - 105s 1s/step - loss: 0.0644 - accuracy: 0.9770 - val_loss: 1.8021 - val_accuracy: 0.6250

Epoch 00023: val_loss did not improve from 0.77294
Epoch 24/50
100/100 [==============================] - 108s 1s/step - loss: 0.0839 - accuracy: 0.9655 - val_loss: 0.9646 - val_accuracy: 0.7100

Epoch 00024: val_loss did not improve from 0.77294
Epoch 25/50
100/100 [==============================] - 109s 1s/step - loss: 0.0726 - accuracy: 0.9680 - val_loss: 2.0138 - val_accuracy: 0.6400

Epoch 00025: val_loss did not improve from 0.77294
Epoch 26/50
100/100 [==============================] - 90s 894ms/step - loss: 0.0601 - accuracy: 0.9780 - val_loss: 1.2048 - val_accuracy: 0.6900

Epoch 00026: val_loss did not improve from 0.77294
Epoch 27/50
100/100 [==============================] - 86s 860ms/step - loss: 0.0655 - accuracy: 0.9740 - val_loss: 1.2675 - val_accuracy: 0.6950

Epoch 00027: val_loss did not improve from 0.77294
Epoch 28/50
100/100 [==============================] - 86s 853ms/step - loss: 0.0496 - accuracy: 0.9805 - val_loss: 1.4657 - val_accuracy: 0.6700

Epoch 00028: val_loss did not improve from 0.77294
Epoch 29/50
100/100 [==============================] - 85s 846ms/step - loss: 0.0565 - accuracy: 0.9770 - val_loss: 1.8231 - val_accuracy: 0.6400

Epoch 00029: val_loss did not improve from 0.77294
Epoch 30/50
100/100 [==============================] - 85s 846ms/step - loss: 0.0573 - accuracy: 0.9790 - val_loss: 2.2683 - val_accuracy: 0.6200

Epoch 00030: val_loss did not improve from 0.77294
Epoch 31/50
100/100 [==============================] - 85s 847ms/step - loss: 0.0561 - accuracy: 0.9810 - val_loss: 1.2069 - val_accuracy: 0.6850

Epoch 00031: val_loss did not improve from 0.77294
Epoch 32/50
100/100 [==============================] - 85s 849ms/step - loss: 0.0686 - accuracy: 0.9765 - val_loss: 1.1702 - val_accuracy: 0.7000

Epoch 00032: val_loss did not improve from 0.77294
Epoch 33/50
100/100 [==============================] - 85s 848ms/step - loss: 0.0517 - accuracy: 0.9835 - val_loss: 1.3022 - val_accuracy: 0.6950

Epoch 00033: val_loss did not improve from 0.77294
Epoch 34/50
100/100 [==============================] - 86s 860ms/step - loss: 0.0606 - accuracy: 0.9775 - val_loss: 0.5868 - val_accuracy: 0.7500

Epoch 00034: val_loss improved from 0.77294 to 0.58676, saving model to ./train/x-ray\cp-0034.ckpt
Epoch 35/50
100/100 [==============================] - 85s 849ms/step - loss: 0.0559 - accuracy: 0.9825 - val_loss: 1.5848 - val_accuracy: 0.6600

Epoch 00035: val_loss did not improve from 0.58676
Epoch 36/50
100/100 [==============================] - 85s 847ms/step - loss: 0.0418 - accuracy: 0.9830 - val_loss: 1.0125 - val_accuracy: 0.7200

Epoch 00036: val_loss did not improve from 0.58676
Epoch 37/50
100/100 [==============================] - 85s 848ms/step - loss: 0.0603 - accuracy: 0.9745 - val_loss: 1.2868 - val_accuracy: 0.7050

Epoch 00037: val_loss did not improve from 0.58676
Epoch 38/50
100/100 [==============================] - 85s 848ms/step - loss: 0.0506 - accuracy: 0.9815 - val_loss: 1.8795 - val_accuracy: 0.6450

Epoch 00038: val_loss did not improve from 0.58676
Epoch 39/50
100/100 [==============================] - 85s 844ms/step - loss: 0.0599 - accuracy: 0.9765 - val_loss: 0.9368 - val_accuracy: 0.7400

Epoch 00039: val_loss did not improve from 0.58676
Epoch 40/50
100/100 [==============================] - 85s 847ms/step - loss: 0.0452 - accuracy: 0.9825 - val_loss: 2.4980 - val_accuracy: 0.6500

Epoch 00040: val_loss did not improve from 0.58676
Epoch 41/50
100/100 [==============================] - 84s 835ms/step - loss: 0.0404 - accuracy: 0.9835 - val_loss: 1.2629 - val_accuracy: 0.7100

Epoch 00041: val_loss did not improve from 0.58676
Epoch 42/50
100/100 [==============================] - 86s 852ms/step - loss: 0.0493 - accuracy: 0.9805 - val_loss: 1.6048 - val_accuracy: 0.6450

Epoch 00042: val_loss did not improve from 0.58676
Epoch 43/50
100/100 [==============================] - 96s 955ms/step - loss: 0.0443 - accuracy: 0.9820 - val_loss: 2.6243 - val_accuracy: 0.6400

Epoch 00043: val_loss did not improve from 0.58676
Epoch 44/50
100/100 [==============================] - 105s 1s/step - loss: 0.0480 - accuracy: 0.9830 - val_loss: 2.7355 - val_accuracy: 0.5800

Epoch 00044: val_loss did not improve from 0.58676
Epoch 45/50
100/100 [==============================] - 106s 1s/step - loss: 0.0397 - accuracy: 0.9865 - val_loss: 1.8251 - val_accuracy: 0.6400

Epoch 00045: val_loss did not improve from 0.58676
Epoch 46/50
100/100 [==============================] - 105s 1s/step - loss: 0.0445 - accuracy: 0.9850 - val_loss: 0.9322 - val_accuracy: 0.7250

Epoch 00046: val_loss did not improve from 0.58676
Epoch 47/50
100/100 [==============================] - 108s 1s/step - loss: 0.0409 - accuracy: 0.9830 - val_loss: 1.3058 - val_accuracy: 0.7000

Epoch 00047: val_loss did not improve from 0.58676
Epoch 48/50
100/100 [==============================] - 108s 1s/step - loss: 0.0422 - accuracy: 0.9835 - val_loss: 2.7299 - val_accuracy: 0.5900

Epoch 00048: val_loss did not improve from 0.58676
Epoch 49/50
100/100 [==============================] - 105s 1s/step - loss: 0.0475 - accuracy: 0.9805 - val_loss: 0.9925 - val_accuracy: 0.7850

Epoch 00049: val_loss did not improve from 0.58676
Epoch 50/50
100/100 [==============================] - 81s 809ms/step - loss: 0.0427 - accuracy: 0.9845 - val_loss: 0.6244 - val_accuracy: 0.7850

Epoch 00050: val_loss did not improve from 0.58676
from matplotlib.pyplot import figure
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
def plot_metrics(history):
  metrics = ['loss', 'accuracy']
  for n, metric in enumerate(metrics):
    name = metric.replace("_"," ").capitalize()
    figure(figsize=(20, 8))
    plt.subplot(1,2,n+1)
    plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train')
    plt.plot(history.epoch, history.history['val_'+metric],
             color=colors[0], linestyle="--", label='Val')
    plt.xlabel('Epoch')
    plt.ylabel(name)
    if metric == 'loss':
      plt.ylim([0, plt.ylim()[1]])
    elif metric == 'auc':
      plt.ylim([0.8,1])
    else:
      plt.ylim([0,1])

    plt.legend()
plot_metrics(history)
model.summary()
Model: "Cat_Dog_CNN"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv (Conv)                  (None, None, None, 32)    448       
_________________________________________________________________
conv_1 (Conv)                (None, None, None, 64)    18752     
_________________________________________________________________
conv_2 (Conv)                (None, None, None, 128)   74368     
_________________________________________________________________
conv_3 (Conv)                (None, None, None, 128)   148096    
_________________________________________________________________
flatten (Flatten)            (None, None)              0         
_________________________________________________________________
dense (Dense)                (None, 512)               3211776   
_________________________________________________________________
dense_1 (Dense)              (None, 2)                 1026      
=================================================================
Total params: 3,454,466
Trainable params: 3,453,762
Non-trainable params: 704
_________________________________________________________________
test_ds
<RepeatDataset shapes: ((None, 148, 148, None), (None,)), types: (tf.float32, tf.int32)>
model.evaluate(test_ds, steps=int(test_len/batch_size))
20/20 [==============================] - 2s 112ms/step - loss: 0.2884 - accuracy: 0.8950
[0.2883862555027008, 0.8949999809265137]
test_len
200
""" Steps should be equal to total samples (including both of the folders inside test folder) 
divided by batch size"""
batch_size = 10
predictions = model.predict(test_ds, steps=int(test_len/batch_size))
predictions
array([[9.85657334e-01, 1.43426713e-02],
       [9.99776781e-01, 2.23287541e-04],
       [8.64409924e-01, 1.35590136e-01],
       [9.99276578e-01, 7.23473204e-04],
       [9.91437137e-01, 8.56289174e-03],
       [4.97444481e-01, 5.02555490e-01],
       [9.86491621e-01, 1.35083832e-02],
       [9.99639750e-01, 3.60298000e-04],
       [9.93371844e-01, 6.62815524e-03],
       [9.93454635e-01, 6.54536439e-03],
       [9.99535680e-01, 4.64307755e-04],
       [9.86793697e-01, 1.32063385e-02],
       [9.98911142e-01, 1.08882831e-03],
       [7.32735872e-01, 2.67264098e-01],
       [9.74538684e-01, 2.54613403e-02],
       [9.60229278e-01, 3.97707224e-02],
       [9.66539204e-01, 3.34607884e-02],
       [9.86298084e-01, 1.37019679e-02],
       [9.99833584e-01, 1.66425540e-04],
       [7.65602887e-01, 2.34397158e-01],
       [6.70023382e-01, 3.29976588e-01],
       [9.95202422e-01, 4.79759090e-03],
       [9.55268025e-01, 4.47319858e-02],
       [9.08097625e-01, 9.19024050e-02],
       [8.19931269e-01, 1.80068731e-01],
       [9.99431670e-01, 5.68260264e-04],
       [9.99796569e-01, 2.03445801e-04],
       [9.99282300e-01, 7.17661052e-04],
       [9.99994278e-01, 5.68568203e-06],
       [7.48119056e-01, 2.51881003e-01],
       [9.83032346e-01, 1.69676840e-02],
       [9.99669790e-01, 3.30263254e-04],
       [8.51692975e-01, 1.48306996e-01],
       [9.23557878e-01, 7.64420778e-02],
       [9.05704498e-01, 9.42955092e-02],
       [9.99992371e-01, 7.66991616e-06],
       [9.94957745e-01, 5.04231080e-03],
       [9.19523537e-01, 8.04765150e-02],
       [2.39147276e-01, 7.60852695e-01],
       [9.99994993e-01, 4.98324152e-06],
       [9.99448359e-01, 5.51683363e-04],
       [7.54624188e-01, 2.45375797e-01],
       [1.13067344e-01, 8.86932611e-01],
       [9.74270821e-01, 2.57292371e-02],
       [8.35689604e-01, 1.64310351e-01],
       [9.67578530e-01, 3.24214920e-02],
       [8.55771184e-01, 1.44228816e-01],
       [9.96430874e-01, 3.56906885e-03],
       [9.94876444e-01, 5.12352493e-03],
       [9.99932289e-01, 6.77012649e-05],
       [9.70197856e-01, 2.98021659e-02],
       [9.99202430e-01, 7.97536224e-04],
       [9.14013460e-02, 9.08598661e-01],
       [9.81141210e-01, 1.88587606e-02],
       [1.12472055e-02, 9.88752723e-01],
       [9.97602046e-01, 2.39792606e-03],
       [9.99979377e-01, 2.06440855e-05],
       [9.99268353e-01, 7.31674198e-04],
       [9.99997854e-01, 2.13386556e-06],
       [9.70610023e-01, 2.93899346e-02],
       [7.94489443e-01, 2.05510542e-01],
       [7.33211543e-03, 9.92667854e-01],
       [9.75319624e-01, 2.46804170e-02],
       [1.97908074e-01, 8.02091956e-01],
       [8.84383380e-01, 1.15616612e-01],
       [9.99961138e-01, 3.88987137e-05],
       [9.91863012e-01, 8.13699979e-03],
       [9.99801457e-01, 1.98573107e-04],
       [6.90290034e-01, 3.09709996e-01],
       [9.99935389e-01, 6.45479959e-05],
       [9.48556125e-01, 5.14439307e-02],
       [9.76195812e-01, 2.38042045e-02],
       [9.99999523e-01, 5.08934647e-07],
       [8.19931269e-01, 1.80068731e-01],
       [9.98999894e-01, 1.00009795e-03],
       [6.26317561e-01, 3.73682439e-01],
       [9.79074776e-01, 2.09252276e-02],
       [9.99930382e-01, 6.96428106e-05],
       [9.99850988e-01, 1.48991836e-04],
       [2.74874060e-03, 9.97251213e-01],
       [4.57723022e-01, 5.42276978e-01],
       [9.99959469e-01, 4.05785177e-05],
       [8.28060329e-01, 1.71939671e-01],
       [9.99933839e-01, 6.61413360e-05],
       [9.98803616e-01, 1.19643949e-03],
       [9.84709024e-01, 1.52909458e-02],
       [6.26547456e-01, 3.73452604e-01],
       [9.99979734e-01, 2.03010804e-05],
       [9.99873400e-01, 1.26630621e-04],
       [4.78458375e-01, 5.21541536e-01],
       [9.96844172e-01, 3.15584964e-03],
       [9.99795258e-01, 2.04735581e-04],
       [4.71913606e-01, 5.28086483e-01],
       [9.94498432e-01, 5.50155155e-03],
       [9.90329504e-01, 9.67049971e-03],
       [9.99999762e-01, 2.64577835e-07],
       [9.40155387e-01, 5.98445721e-02],
       [9.96164680e-01, 3.83532280e-03],
       [9.64362323e-01, 3.56376134e-02],
       [8.73457611e-01, 1.26542464e-01],
       [1.69541419e-03, 9.98304605e-01],
       [5.51110588e-06, 9.99994516e-01],
       [8.85826796e-02, 9.11417246e-01],
       [1.32892319e-05, 9.99986768e-01],
       [6.64665131e-05, 9.99933481e-01],
       [4.88702790e-04, 9.99511242e-01],
       [8.98612976e-01, 1.01387009e-01],
       [7.13682398e-02, 9.28631723e-01],
       [9.86585259e-01, 1.34147303e-02],
       [1.39485730e-03, 9.98605192e-01],
       [2.63140917e-01, 7.36859083e-01],
       [1.76326407e-03, 9.98236775e-01],
       [3.41288105e-05, 9.99965906e-01],
       [1.20350353e-01, 8.79649639e-01],
       [2.79125234e-04, 9.99720871e-01],
       [9.89445616e-05, 9.99901056e-01],
       [4.43055853e-03, 9.95569408e-01],
       [1.01792524e-02, 9.89820719e-01],
       [3.83035233e-03, 9.96169627e-01],
       [2.98472762e-04, 9.99701440e-01],
       [3.84452264e-03, 9.96155441e-01],
       [1.38619207e-02, 9.86138046e-01],
       [2.12187283e-02, 9.78781283e-01],
       [7.06023071e-03, 9.92939711e-01],
       [9.60930288e-01, 3.90697718e-02],
       [9.44973342e-03, 9.90550280e-01],
       [5.40239096e-01, 4.59760875e-01],
       [1.16611621e-03, 9.98833835e-01],
       [7.99014833e-06, 9.99992013e-01],
       [1.02940416e-04, 9.99897003e-01],
       [3.85035586e-04, 9.99614954e-01],
       [7.17576081e-03, 9.92824197e-01],
       [7.48903573e-01, 2.51096487e-01],
       [4.76671085e-02, 9.52332854e-01],
       [4.58785553e-06, 9.99995470e-01],
       [4.24756181e-05, 9.99957561e-01],
       [1.46502256e-01, 8.53497803e-01],
       [1.72045966e-05, 9.99982834e-01],
       [6.45562774e-03, 9.93544340e-01],
       [6.80038799e-03, 9.93199646e-01],
       [9.02544853e-05, 9.99909759e-01],
       [1.08804239e-03, 9.98911977e-01],
       [7.08077908e-01, 2.91922122e-01],
       [7.97863235e-04, 9.99202192e-01],
       [3.54133570e-03, 9.96458709e-01],
       [8.83329570e-01, 1.16670460e-01],
       [5.36343083e-04, 9.99463618e-01],
       [8.08406528e-03, 9.91915882e-01],
       [7.92855339e-04, 9.99207199e-01],
       [1.05939853e-05, 9.99989390e-01],
       [1.25802336e-02, 9.87419784e-01],
       [6.50432259e-02, 9.34956789e-01],
       [1.50379464e-01, 8.49620581e-01],
       [9.07501671e-04, 9.99092460e-01],
       [9.93537344e-03, 9.90064681e-01],
       [7.35002477e-03, 9.92649972e-01],
       [5.62144350e-03, 9.94378507e-01],
       [2.56918956e-05, 9.99974251e-01],
       [9.05568339e-03, 9.90944266e-01],
       [1.48160500e-04, 9.99851823e-01],
       [1.49371290e-05, 9.99985099e-01],
       [3.99887422e-03, 9.96001065e-01],
       [1.59233436e-01, 8.40766549e-01],
       [1.36563471e-02, 9.86343622e-01],
       [1.01777585e-02, 9.89822268e-01],
       [1.26898989e-01, 8.73101056e-01],
       [2.93411734e-03, 9.97065842e-01],
       [1.42731369e-04, 9.99857306e-01],
       [4.03385711e-05, 9.99959707e-01],
       [2.93119811e-04, 9.99706805e-01],
       [2.30396073e-03, 9.97695982e-01],
       [1.83795422e-01, 8.16204607e-01],
       [1.48447510e-03, 9.98515546e-01],
       [5.91264688e-04, 9.99408722e-01],
       [4.48765677e-05, 9.99955177e-01],
       [1.52140425e-03, 9.98478591e-01],
       [2.15825916e-04, 9.99784172e-01],
       [3.09880115e-02, 9.69012022e-01],
       [3.01997602e-01, 6.98002398e-01],
       [6.44010652e-06, 9.99993563e-01],
       [6.29856065e-02, 9.37014341e-01],
       [3.50856334e-02, 9.64914322e-01],
       [9.15549994e-01, 8.44499767e-02],
       [1.54254362e-02, 9.84574616e-01],
       [3.65524925e-02, 9.63447452e-01],
       [1.11409612e-02, 9.88859057e-01],
       [1.91820621e-01, 8.08179379e-01],
       [1.21757970e-03, 9.98782456e-01],
       [6.78364770e-04, 9.99321699e-01],
       [3.66500437e-01, 6.33499503e-01],
       [6.23298824e-01, 3.76701176e-01],
       [6.47320509e-01, 3.52679491e-01],
       [1.66842653e-06, 9.99998331e-01],
       [8.69961455e-02, 9.13003862e-01],
       [3.86185795e-01, 6.13814294e-01],
       [1.19015481e-03, 9.98809814e-01],
       [7.51811713e-02, 9.24818814e-01],
       [6.69739366e-01, 3.30260664e-01],
       [1.10908980e-02, 9.88909066e-01],
       [1.37867119e-05, 9.99986172e-01]], dtype=float32)
predictions.shape
(200, 2)

Need to convert the predictions to binaries to get classification report

pred = [0]*predictions.shape[0]
for i in range(predictions.shape[0]):
    if float(predictions[i][0]) > float(predictions[i][1]):
        pred[i] = 0
    else:
        pred[i] = 1
pred
[0,
 0,
 0,
 0,
 0,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 1,
 0,
 0,
 0,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 1,
 0,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 1,
 0,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 1,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 1,
 0,
 0,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 1,
 1,
 1,
 1,
 1,
 1,
 0,
 1,
 0,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 0,
 1,
 0,
 1,
 1,
 1,
 1,
 1,
 0,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 0,
 1,
 1,
 0,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 0,
 1,
 1,
 1,
 1,
 1,
 1,
 1,
 0,
 0,
 1,
 1,
 1,
 1,
 1,
 0,
 1,
 1]
# predictions
i=0
p = []

"""test_ds.take method takes samles from the test_ds n times when n is the number provided 
as argument. When all the samples are taken, it starts repaeating from the first position"""

for images, labels in test_ds.take(int(test_len/batch_size)):
    i+=1
    predictions = model(images)
    p.append(predictions)
    fig, ax = plt.subplots(1, batch_size, figsize=(20, 6)) 
    """Here the second argument in plt.subplots is different from the test_ds.take method's argument
    plt.plot's second argument should be equalto the batch size whereas test_ds's argument indicates
    how many times the script will enter test_ds"""
    for j in range(batch_size): 
        """This argument is for loop is also equal to batch size"""
        image = images[j].numpy()
        image = image / np.amax(image)
        image = np.clip(image, 0, 1)
        ax[j].imshow(image)
        
        normal_prob = predictions[j][0]
        normal_prob = round(float(normal_prob),2)
        pneumonia_prob = predictions[j][1]
        pneumonia_prob = round(float(pneumonia_prob),2)
        ax[j].set_title(" gnd = {},\n  n = {},\n p = {}".format(labels[j].numpy(),\
                                                                        normal_prob,pneumonia_prob))
        
        
        
        
#         ax[j].set_title(labels[j].numpy())
plt.show()
predictions
<tf.Tensor: shape=(10, 2), dtype=float32, numpy=
array([[4.37334005e-04, 9.99562681e-01],
       [3.00131738e-02, 9.69986796e-01],
       [3.81141878e-03, 9.96188581e-01],
       [2.03762911e-02, 9.79623735e-01],
       [1.32271822e-03, 9.98677313e-01],
       [1.20561704e-01, 8.79438281e-01],
       [6.62317143e-06, 9.99993324e-01],
       [3.21331390e-05, 9.99967813e-01],
       [2.24176901e-07, 9.99999762e-01],
       [1.15374448e-02, 9.88462508e-01]], dtype=float32)>
p
[<tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[9.9997640e-01, 2.3611767e-05],
        [9.9460572e-01, 5.3943270e-03],
        [1.9056603e-02, 9.8094338e-01],
        [9.9662638e-01, 3.3736371e-03],
        [3.6160144e-01, 6.3839853e-01],
        [9.8269629e-01, 1.7303696e-02],
        [9.9989533e-01, 1.0463434e-04],
        [9.9222624e-01, 7.7737737e-03],
        [9.9708241e-01, 2.9175165e-03],
        [9.3685031e-01, 6.3149773e-02]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[2.07020372e-01, 7.92979658e-01],
        [8.77992570e-01, 1.22007392e-01],
        [9.98836935e-01, 1.16312108e-03],
        [2.96648383e-01, 7.03351617e-01],
        [9.99999046e-01, 9.17801970e-07],
        [9.99280870e-01, 7.19166943e-04],
        [9.99985695e-01, 1.42726085e-05],
        [1.91694841e-01, 8.08305144e-01],
        [9.87044752e-01, 1.29553061e-02],
        [7.77511252e-03, 9.92224932e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[9.99583781e-01, 4.16221999e-04],
        [2.32120156e-02, 9.76787925e-01],
        [1.71785976e-03, 9.98282194e-01],
        [8.23590040e-01, 1.76409945e-01],
        [9.99817908e-01, 1.82167278e-04],
        [1.18535578e-01, 8.81464422e-01],
        [4.92661744e-02, 9.50733840e-01],
        [9.89264965e-01, 1.07350005e-02],
        [9.99361217e-01, 6.38783094e-04],
        [2.18322512e-05, 9.99978185e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[9.5074439e-01, 4.9255610e-02],
        [1.9186202e-02, 9.8081380e-01],
        [9.5544562e-02, 9.0445542e-01],
        [7.9370022e-01, 2.0629981e-01],
        [9.0158397e-01, 9.8416001e-02],
        [9.9509758e-01, 4.9023991e-03],
        [9.9992764e-01, 7.2373368e-05],
        [8.8502324e-01, 1.1497678e-01],
        [7.1819514e-01, 2.8180492e-01],
        [4.8267615e-01, 5.1732391e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[9.9998116e-01, 1.8830309e-05],
        [7.0943815e-01, 2.9056183e-01],
        [9.9975425e-01, 2.4571409e-04],
        [8.3195549e-01, 1.6804454e-01],
        [9.9665976e-01, 3.3402327e-03],
        [9.7706670e-01, 2.2933310e-02],
        [4.8827454e-03, 9.9511731e-01],
        [2.9929075e-01, 7.0070922e-01],
        [1.4642225e-01, 8.5357773e-01],
        [8.6713964e-01, 1.3286045e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[5.7048064e-02, 9.4295192e-01],
        [6.8188381e-01, 3.1811628e-01],
        [9.9993467e-01, 6.5311811e-05],
        [9.9801946e-01, 1.9805981e-03],
        [9.9940062e-01, 5.9934362e-04],
        [4.8240823e-01, 5.1759177e-01],
        [9.9987769e-01, 1.2231751e-04],
        [7.5315982e-01, 2.4684025e-01],
        [9.9860138e-01, 1.3986370e-03],
        [1.4008899e-04, 9.9985993e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[9.7329539e-01, 2.6704585e-02],
        [9.9953806e-01, 4.6195605e-04],
        [2.5956589e-01, 7.4043417e-01],
        [9.6730834e-01, 3.2691710e-02],
        [9.9542809e-01, 4.5719543e-03],
        [9.9979073e-01, 2.0929299e-04],
        [2.2868372e-01, 7.7131623e-01],
        [7.2618510e-04, 9.9927384e-01],
        [3.1125569e-01, 6.8874431e-01],
        [9.5836413e-01, 4.1635927e-02]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[0.13144295, 0.86855704],
        [0.36679748, 0.6332025 ],
        [0.8150542 , 0.18494579],
        [0.20597757, 0.79402244],
        [0.87983507, 0.12016494],
        [0.41560125, 0.58439875],
        [0.9981335 , 0.0018666 ],
        [0.56172115, 0.43827885],
        [0.99794143, 0.00205855],
        [0.80389744, 0.19610254]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[1.6033353e-02, 9.8396665e-01],
        [9.9982208e-01, 1.7793862e-04],
        [9.6246356e-01, 3.7536424e-02],
        [1.3810484e-03, 9.9861896e-01],
        [8.7185383e-02, 9.1281468e-01],
        [9.6629500e-01, 3.3704951e-02],
        [9.9177599e-01, 8.2239853e-03],
        [2.3377632e-01, 7.6622373e-01],
        [9.9936086e-01, 6.3911802e-04],
        [4.1348359e-01, 5.8651638e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[3.1530213e-02, 9.6846974e-01],
        [9.4895828e-01, 5.1041774e-02],
        [2.8660566e-01, 7.1339428e-01],
        [2.7487752e-01, 7.2512245e-01],
        [7.2765017e-01, 2.7234983e-01],
        [1.0000000e+00, 5.1669875e-08],
        [7.1596634e-01, 2.8403369e-01],
        [7.4264830e-01, 2.5735161e-01],
        [4.4818640e-01, 5.5181354e-01],
        [9.2353189e-01, 7.6468110e-02]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[5.1465398e-03, 9.9485344e-01],
        [1.9237743e-03, 9.9807620e-01],
        [7.3257961e-06, 9.9999273e-01],
        [1.8967977e-08, 1.0000000e+00],
        [1.8141576e-07, 9.9999976e-01],
        [1.7709215e-04, 9.9982291e-01],
        [1.2723017e-05, 9.9998724e-01],
        [1.7405267e-05, 9.9998260e-01],
        [6.4199250e-03, 9.9358010e-01],
        [5.5756314e-05, 9.9994421e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[2.7713349e-07, 9.9999976e-01],
        [4.6796207e-03, 9.9532038e-01],
        [6.0605096e-05, 9.9993944e-01],
        [1.1060315e-06, 9.9999893e-01],
        [1.4968660e-04, 9.9985027e-01],
        [9.7200825e-07, 9.9999905e-01],
        [4.2236229e-06, 9.9999583e-01],
        [1.9549063e-06, 9.9999809e-01],
        [3.3796155e-07, 9.9999964e-01],
        [2.2908565e-04, 9.9977094e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[6.3038719e-01, 3.6961281e-01],
        [1.2336066e-02, 9.8766392e-01],
        [9.2167949e-04, 9.9907827e-01],
        [1.9604218e-01, 8.0395788e-01],
        [7.6305028e-03, 9.9236953e-01],
        [8.2812592e-05, 9.9991715e-01],
        [6.0462626e-05, 9.9993956e-01],
        [7.5546541e-04, 9.9924457e-01],
        [1.4876950e-01, 8.5123056e-01],
        [3.3262427e-04, 9.9966741e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[1.6045369e-03, 9.9839550e-01],
        [2.3169729e-05, 9.9997687e-01],
        [1.8939108e-03, 9.9810612e-01],
        [2.6002672e-04, 9.9973994e-01],
        [2.9599574e-05, 9.9997044e-01],
        [3.2343855e-03, 9.9676561e-01],
        [1.2464504e-04, 9.9987531e-01],
        [9.8485441e-04, 9.9901509e-01],
        [6.0801220e-05, 9.9993920e-01],
        [9.4231637e-03, 9.9057686e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[7.9513824e-01, 2.0486182e-01],
        [4.1434990e-04, 9.9958569e-01],
        [8.1938019e-08, 9.9999988e-01],
        [2.1870263e-05, 9.9997818e-01],
        [3.5994139e-02, 9.6400583e-01],
        [1.1750820e-05, 9.9998820e-01],
        [6.5872036e-06, 9.9999344e-01],
        [5.3617905e-04, 9.9946386e-01],
        [1.4401111e-04, 9.9985600e-01],
        [5.0544418e-06, 9.9999499e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[2.0332830e-03, 9.9796677e-01],
        [8.5219639e-05, 9.9991477e-01],
        [5.1000100e-01, 4.8999900e-01],
        [2.2640487e-03, 9.9773598e-01],
        [6.6506600e-06, 9.9999332e-01],
        [6.1609391e-02, 9.3839061e-01],
        [4.4349380e-04, 9.9955648e-01],
        [2.3848681e-06, 9.9999762e-01],
        [2.9000235e-03, 9.9710000e-01],
        [2.4177107e-01, 7.5822890e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[9.62878287e-01, 3.71217169e-02],
        [8.94213021e-01, 1.05786964e-01],
        [7.93836892e-01, 2.06163108e-01],
        [9.61671321e-05, 9.99903798e-01],
        [2.14240972e-05, 9.99978542e-01],
        [2.11853534e-03, 9.97881472e-01],
        [5.28566912e-03, 9.94714320e-01],
        [1.83256459e-04, 9.99816716e-01],
        [8.69176220e-05, 9.99913096e-01],
        [1.58263668e-02, 9.84173656e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[1.3441809e-04, 9.9986553e-01],
        [2.6691348e-06, 9.9999738e-01],
        [9.3518935e-02, 9.0648103e-01],
        [1.1311603e-03, 9.9886876e-01],
        [1.3115118e-01, 8.6884886e-01],
        [7.9298043e-05, 9.9992073e-01],
        [3.5523926e-04, 9.9964476e-01],
        [1.0311117e-04, 9.9989688e-01],
        [2.9476205e-04, 9.9970526e-01],
        [2.1862721e-03, 9.9781370e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[2.1129787e-05, 9.9997890e-01],
        [6.4583614e-06, 9.9999356e-01],
        [2.2637751e-03, 9.9773622e-01],
        [5.2859301e-05, 9.9994719e-01],
        [5.4847723e-04, 9.9945158e-01],
        [1.3405080e-08, 1.0000000e+00],
        [2.5200158e-02, 9.7479987e-01],
        [1.1127055e-07, 9.9999988e-01],
        [2.7627090e-04, 9.9972373e-01],
        [9.0521586e-04, 9.9909484e-01]], dtype=float32)>,
 <tf.Tensor: shape=(10, 2), dtype=float32, numpy=
 array([[4.37334005e-04, 9.99562681e-01],
        [3.00131738e-02, 9.69986796e-01],
        [3.81141878e-03, 9.96188581e-01],
        [2.03762911e-02, 9.79623735e-01],
        [1.32271822e-03, 9.98677313e-01],
        [1.20561704e-01, 8.79438281e-01],
        [6.62317143e-06, 9.99993324e-01],
        [3.21331390e-05, 9.99967813e-01],
        [2.24176901e-07, 9.99999762e-01],
        [1.15374448e-02, 9.88462508e-01]], dtype=float32)>]
len(p)
20
p[9].shape
TensorShape([10, 2])